udelay(10);
}
- tw32(HOSTCC_RXCOL_TICKS, 0);
- tw32(HOSTCC_RXMAX_FRAMES, 1);
- tw32(HOSTCC_RXCOAL_TICK_INT, 0);
- tw32(HOSTCC_RXCOAL_MAXF_INT, 1);
- tw32(HOSTCC_TXCOL_TICKS, LOW_TXCOL_TICKS);
- tw32(HOSTCC_TXMAX_FRAMES, LOW_RXMAX_FRAMES);
- tw32(HOSTCC_TXCOAL_TICK_INT, 0);
- tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
+ // akw: I have set these all back to default coalescing values.
+
+ tw32(HOSTCC_RXCOL_TICKS, DEFAULT_RXCOL_TICKS); //0);
+ tw32(HOSTCC_RXMAX_FRAMES, DEFAULT_RXMAX_FRAMES); //1);
+ tw32(HOSTCC_RXCOAL_TICK_INT, DEFAULT_RXCOAL_TICK_INT); //, 0);
+ tw32(HOSTCC_RXCOAL_MAXF_INT, DEFAULT_RXCOAL_MAXF_INT); //, 1);
+ tw32(HOSTCC_TXCOL_TICKS, DEFAULT_TXCOL_TICKS); //, LOW_TXCOL_TICKS);
+ tw32(HOSTCC_TXMAX_FRAMES, DEFAULT_TXMAX_FRAMES); //, LOW_RXMAX_FRAMES);
+ tw32(HOSTCC_TXCOAL_TICK_INT, DEFAULT_TXCOAL_TICK_INT); //, 0);
+ tw32(HOSTCC_TXCOAL_MAXF_INT, DEFAULT_TXCOAL_MAXF_INT); //, 0);
tw32(HOSTCC_STAT_COAL_TICKS,
DEFAULT_STAT_COAL_TICKS);
}
tp->rx_offset = 2;
+
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
(tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
- tp->rx_offset = 0;
+ printk("WARNING: This card may not support unaligned receive pointers.\n");
+ //tp->rx_offset = 0;
/* By default, disable wake-on-lan. User can change this
* using ETHTOOL_SWOL.
#define TG3_BDINFO_NIC_ADDR 0xcUL /* 32-bit */
#define TG3_BDINFO_SIZE 0x10UL
-#define RX_COPY_THRESHOLD 256
+#define RX_COPY_THRESHOLD 0 //256
#define RX_STD_MAX_SIZE 1536
#define RX_JUMBO_MAX_SIZE 0xdeadbeef /* XXX */
BUG();
flush_write_buffers();
- if ((unsigned long) ptr > PAGE_OFFSET)
- return virt_to_bus(ptr);
-
- /* If an address that is not in hypervisor VM is passed to this
- * function (ie > PAGE_OFFSET) we assume that the passer knows
- * what they are doing, and have passed a physical address that
- * should not be converted here. This is a little hackish, but
- * is being added to allow references to domain memory in order
- * to support zero-copy network code.
- */
-
- return (dma_addr_t) ptr;
+ return virt_to_bus(ptr);
}
/* Unmap a single streaming mode DMA translation. The dma_addr and size
rx = shadow_ring->rx_ring+i;
if ( (skb->len + ETH_HLEN) < rx->size )
rx->size = skb->len + ETH_HLEN;
-
-
g_pte = map_domain_mem(rx->addr);
g_pfn = frame_table + (*g_pte >> PAGE_SHIFT);
h_pfn = skb->pf;
- //flip and/or set relevant pf_info fields.
- //tmp = g_pfn->next; g_pfn->next = h_pfn->next; h_pfn->next = tmp;
- //tmp = g_pfn->prev; g_pfn->prev = h_pfn->prev; h_pfn->prev = tmp;
- //tmp = g_pfn->flags; g_pfn->flags = h_pfn->flags; h_pfn->flags = tmp;
h_pfn->tot_count = h_pfn->type_count = 1;
g_pfn->tot_count = g_pfn->type_count = 0;
h_pfn->flags = g_pfn->flags & (~PG_type_mask);
-//if (h_pfn->flags & PG_domain_mask) printk("deliver packet to dom %lu\n", (h_pfn->flags & PG_domain_mask));
+
if (*g_pte & _PAGE_RW) h_pfn->flags |= PGT_writeable_page;
g_pfn->flags = 0;
+
//point guest pte at the new page:
-
-//printk("newmpfn: %lx, old mpfn: %lx, old:(%lx) new:(%lx)\n", h_pfn - frame_table, *g_pte >> PAGE_SHIFT, machine_to_phys_mapping[h_pfn - frame_table], machine_to_phys_mapping[*g_pte >> PAGE_SHIFT]);
-
machine_to_phys_mapping[h_pfn - frame_table]
= machine_to_phys_mapping[g_pfn - frame_table];
get_fast_time(&skb->stamp);
if ( (skb->data - skb->head) != (18 + ETH_HLEN) )
- BUG();
+ printk("headroom was %lu!\n", (unsigned long)skb->data - (unsigned long)skb->head);
+ // BUG();
skb->head = (u8 *)map_domain_mem(((skb->pf - frame_table) << PAGE_SHIFT));
if ( skb->dst_vif == VIF_UNKNOWN_INTERFACE )
skb->dst_vif = __net_get_target_vif(skb->mac.raw, skb->len, skb->src_vif);
-if (skb->dst_vif == VIF_DROP)
-printk("netif_rx target: %d (sec: %u)\n", skb->dst_vif, skb->security);
+//if (skb->dst_vif == VIF_DROP)
+//printk("netif_rx target: %d (sec: %u)\n", skb->dst_vif, skb->security);
if ( (vif = sys_vif_list[skb->dst_vif]) == NULL )
{
+//printk("No such vif! (%d).\n", skb->dst_vif);
// the target vif does not exist.
goto drop;
}
{
// Local delivery: Allocate an skb off the domain free list
// fil it, and pass it to netif_rx as if it came off the NIC.
-
+//printk("LOCAL! (%d) \n", target);
skb = dev_alloc_skb(tx.size);
if (skb == NULL)
{
skb->src_vif = current_vif->id;
skb->dst_vif = target;
skb->protocol = protocol;
- if (copy_to_user(skb->data, g_data, tx.size))
- {
- unmap_domain_mem(g_data);
- continue;
- }
+
+ skb->head = (u8 *)map_domain_mem(((skb->pf - frame_table) << PAGE_SHIFT));
+ skb->data = skb->head + 16;
+ skb_reserve(skb,2);
+ memcpy(skb->data, g_data, tx.size);
+ skb->len = tx.size;
+ unmap_domain_mem(skb->head);
+ skb->data += ETH_HLEN; // so the assertion in netif_RX doesn't freak out.
- (void)netif_rx(skb); // why is there a void here? It's from the old code.
+ (void)netif_rx(skb);
unmap_domain_mem(g_data);
}
#include <asm/uaccess.h>
#include <asm/system.h>
+#include <asm/io.h>
#define BUG_TRAP ASSERT
kmem_cache_free(skbuff_head_cache, skb);
}
+//static unsigned long skbpagesout=0, skbpagesin=0;
+
static inline u8 *alloc_skb_data_page(struct sk_buff *skb)
{
struct list_head *list_ptr;
spin_unlock_irqrestore(&free_list_lock, flags);
skb->pf = pf;
+//if (skbpagesout++ % 100 == 0) printk("XEN-: skb allocs: %lu\n", skbpagesout);
return (u8 *)((pf - frame_table) << PAGE_SHIFT);
}
free_pfns++;
spin_unlock_irqrestore(&free_list_lock, flags);
+
+//if (skbpagesin++ % 100 == 0) printk("XEN-: skb allocs: %lu\n", skbpagesin);
}
struct sk_buff *alloc_zc_skb(unsigned int size,int gfp_mask)
if (data == NULL)
goto nodata;
+ // This is so that pci_map_single does the right thing in the driver.
+ // If you want to ever use this pointer otherwise, you need to regenerate it
+ // based on skb->pf.
+ data = phys_to_virt((unsigned long)data);
+
/* XXX: does not include slab overhead */
skb->truesize = size + sizeof(struct sk_buff);
#define TX_RING_ADD(_i,_j) (((_i)+(_j)) & (TX_RING_SIZE-1))
#define RX_RING_ADD(_i,_j) (((_i)+(_j)) & (RX_RING_SIZE-1))
-#define RX_BUF_SIZE 1600 /* Ethernet MTU + plenty of slack! */
+#define RX_BUF_SIZE 2049 /* (was 1600) Ethernet MTU + plenty of slack! */
static void network_rx_int(int irq, void *dev_id, struct pt_regs *ptregs);
static void network_tx_int(int irq, void *dev_id, struct pt_regs *ptregs);
{
struct sk_buff *skb;
- //skb = alloc_skb(length+16, gfp_mask);
- skb = alloc_zc_skb(length+16, gfp_mask);
+ skb = alloc_skb(length+16, gfp_mask);
+ //skb = alloc_zc_skb(length+16, gfp_mask);
if (skb)
skb_reserve(skb,16);
return skb;